Close

@InProceedings{CaetanoMeloSantSchw:2017:AcReBa,
               author = "Caetano, Carlos and Melo, Victor H. C. de and Santos, Jefersson A. 
                         dos and Schwartz, William Robson",
          affiliation = "{Universidade Federal de Minas Gerais} and {Universidade Federal 
                         de Minas Gerais} and {Universidade Federal de Minas Gerais} and 
                         {Universidade Federal de Minas Gerais}",
                title = "Activity Recognition based on a Magnitude-Orientation Stream 
                         Network",
            booktitle = "Proceedings...",
                 year = "2017",
               editor = "Torchelsen, Rafael Piccin and Nascimento, Erickson Rangel do and 
                         Panozzo, Daniele and Liu, Zicheng and Farias, Myl{\`e}ne and 
                         Viera, Thales and Sacht, Leonardo and Ferreira, Nivan and Comba, 
                         Jo{\~a}o Luiz Dihl and Hirata, Nina and Schiavon Porto, Marcelo 
                         and Vital, Creto and Pagot, Christian Azambuja and Petronetto, 
                         Fabiano and Clua, Esteban and Cardeal, Fl{\'a}vio",
         organization = "Conference on Graphics, Patterns and Images, 30. (SIBGRAPI)",
            publisher = "IEEE Computer Society",
              address = "Los Alamitos",
             keywords = "Magnitude, Orientation, Stream Network, Convolutional Neural 
                         Networks.",
             abstract = "The temporal component of videos provides an important clue for 
                         activity recognition, as a number of activities can be reliably 
                         recognized based on the motion information. In view of that, this 
                         work proposes a novel temporal stream for two-stream convolutional 
                         networks based on images computed from the optical flow magnitude 
                         and orientation, named Magnitude-Orientation Stream (MOS), to 
                         learn the motion in a better and richer manner. Our method applies 
                         simple nonlinear transformations on the vertical and horizontal 
                         components of the optical flow to generate input images for the 
                         temporal stream. Experimental results, carried on two well-known 
                         datasets (HMDB51 and UCF101), demonstrate that using our proposed 
                         temporal stream as input to existing neural network architectures 
                         can improve their performance for activity recognition. Results 
                         demonstrate that our temporal stream provides complementary 
                         information able to improve the classical two-stream methods, 
                         indicating the suitability of our approach to be used as a 
                         temporal video representation.",
  conference-location = "Niter{\'o}i, RJ, Brazil",
      conference-year = "17-20 Oct. 2017",
                  doi = "10.1109/SIBGRAPI.2017.13",
                  url = "http://dx.doi.org/10.1109/SIBGRAPI.2017.13",
             language = "en",
                  ibi = "8JMKD3MGPAW/3PF6LMS",
                  url = "http://urlib.net/ibi/8JMKD3MGPAW/3PF6LMS",
           targetfile = "main Certified by IEEE PDF eXpress.pdf",
        urlaccessdate = "2024, Apr. 29"
}


Close